From a7dc0fae6568b8849fe343f0351f30b38484b2e4 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Fri, 4 Apr 2008 13:54:05 +0100 Subject: [PATCH] AMD IOMMU: Defer IO pagetable construction until device assignment During HVM domain creation, I/O page tables are filled by coping p2m entries from p2m table, which is a useless step for non-passthru domain. This patch defers I/O page table construction until the moment of device assignment. In case that pci devices are never assigned or hot plugged, the unnecessary duplication will be avoided. Signed-off-by: Wei Wang --- xen/drivers/passthrough/amd/iommu_map.c | 74 ++++++++++++++++--- xen/drivers/passthrough/amd/pci_amd_iommu.c | 5 +- xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 1 + xen/include/xen/hvm/iommu.h | 3 +- 4 files changed, 71 insertions(+), 12 deletions(-) diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c index ad5a8517bd..24ff5eacb8 100644 --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -388,17 +388,17 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn) unsigned long flags; u64 maddr; struct hvm_iommu *hd = domain_hvm_iommu(d); - int iw, ir; + int iw = IOMMU_IO_WRITE_ENABLED; + int ir = IOMMU_IO_READ_ENABLED; BUG_ON( !hd->root_table ); - maddr = (u64)mfn << PAGE_SHIFT; - - iw = IOMMU_IO_WRITE_ENABLED; - ir = IOMMU_IO_READ_ENABLED; - spin_lock_irqsave(&hd->mapping_lock, flags); + if ( is_hvm_domain(d) && !hd->p2m_synchronized ) + goto out; + + maddr = (u64)mfn << PAGE_SHIFT; pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); if ( pte == NULL ) { @@ -409,7 +409,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn) } set_page_table_entry_present((u32 *)pte, maddr, iw, ir); - +out: spin_unlock_irqrestore(&hd->mapping_lock, flags); return 0; } @@ -425,11 +425,17 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn) BUG_ON( !hd->root_table ); + spin_lock_irqsave(&hd->mapping_lock, flags); + + if ( is_hvm_domain(d) && !hd->p2m_synchronized ) + { + spin_unlock_irqrestore(&hd->mapping_lock, flags); + return 0; + } + requestor_id = hd->domain_id; io_addr = (u64)gfn << PAGE_SHIFT; - spin_lock_irqsave(&hd->mapping_lock, flags); - pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); if ( pte == NULL ) { @@ -486,3 +492,53 @@ int amd_iommu_reserve_domain_unity_map( spin_unlock_irqrestore(&hd->mapping_lock, flags); return 0; } + +int amd_iommu_sync_p2m(struct domain *d) +{ + unsigned long mfn, gfn, flags; + void *pte; + u64 maddr; + struct list_head *entry; + struct page_info *page; + struct hvm_iommu *hd; + int iw = IOMMU_IO_WRITE_ENABLED; + int ir = IOMMU_IO_READ_ENABLED; + + if ( !is_hvm_domain(d) ) + return; + + hd = domain_hvm_iommu(d); + + spin_lock_irqsave(&hd->mapping_lock, flags); + + if ( hd->p2m_synchronized ) + goto out; + + for ( entry = d->page_list.next; entry != &d->page_list; + entry = entry->next ) + { + page = list_entry(entry, struct page_info, list); + mfn = page_to_mfn(page); + gfn = get_gpfn_from_mfn(mfn); + + if ( gfn == INVALID_M2P_ENTRY ) + continue; + + maddr = (u64)mfn << PAGE_SHIFT; + pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); + if ( pte == NULL ) + { + dprintk(XENLOG_ERR, + "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn); + spin_unlock_irqrestore(&hd->mapping_lock, flags); + return -EFAULT; + } + set_page_table_entry_present((u32 *)pte, maddr, iw, ir); + } + + hd->p2m_synchronized = 1; + +out: + spin_unlock_irqrestore(&hd->mapping_lock, flags); + return 0; +} diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index 8b93e9c3cd..4f562757f7 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -553,8 +553,9 @@ static int reassign_device( struct domain *source, struct domain *target, int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn) { int bdf = (bus << 8) | devfn; - int req_id; - req_id = ivrs_mappings[bdf].dte_requestor_id; + int req_id = ivrs_mappings[bdf].dte_requestor_id; + + amd_iommu_sync_p2m(d); if ( ivrs_mappings[req_id].unity_map_enable ) { diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h index 8899f27a73..d64913ce58 100644 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @@ -57,6 +57,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn); void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry); int amd_iommu_reserve_domain_unity_map(struct domain *domain, unsigned long phys_addr, unsigned long size, int iw, int ir); +int amd_iommu_sync_p2m(struct domain *d); /* device table functions */ void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, diff --git a/xen/include/xen/hvm/iommu.h b/xen/include/xen/hvm/iommu.h index 8b11d44c16..f9d0de5a0d 100644 --- a/xen/include/xen/hvm/iommu.h +++ b/xen/include/xen/hvm/iommu.h @@ -48,9 +48,10 @@ struct hvm_iommu { int domain_id; int paging_mode; void *root_table; + bool_t p2m_synchronized; /* iommu_ops */ struct iommu_ops *platform_ops; }; -#endif // __ASM_X86_HVM_IOMMU_H__ +#endif /* __ASM_X86_HVM_IOMMU_H__ */ -- 2.30.2